In [1]:
class SimpleClass():
def __init__(self, str_input):
print("SIMPLE" + str_input)
In [2]:
class ExtendedClass(SimpleClass):
def __init__(self):
print('EXTENDED')
The child class will use its own initialization method, if not specified otherwise.
In [3]:
s = ExtendedClass()
If we want to use initialization from the parent class, we can do that using:
super().__init__()
In [4]:
class ExtendedClass(SimpleClass):
def __init__(self):
super().__init__(" My String")
print('EXTENDED')
In [5]:
s = ExtendedClass()
In [6]:
class Operation():
"""
An Operation is a node in a "Graph". TensorFlow will also use this concept of a Graph.
This Operation class will be inherited by other classes that actually compute the specific
operation, such as adding or matrix multiplication.
"""
def __init__(self, input_nodes = []):
"""
Intialize an Operation
"""
# The list of input nodes
self.input_nodes = input_nodes
# Initialize list of nodes consuming this node's output
self.output_nodes = []
# For every node in the input, we append this operation (self) to the list of
# the consumers of the input nodes
for node in input_nodes:
node.output_nodes.append(self)
# There will be a global default graph (TensorFlow works this way)
# We will then append this particular operation
# Append this operation to the list of operations in the currently active default graph
_default_graph.operations.append(self)
def compute(self):
"""
This is a placeholder function. It will be overwritten by the actual specific operation
that inherits from this class.
"""
pass
In [7]:
class add(Operation):
def __init__(self, x, y):
super().__init__([x, y])
def compute(self, x_var, y_var):
self.inputs = [x_var, y_var]
return x_var + y_var
In [8]:
class multiply(Operation):
def __init__(self, a, b):
super().__init__([a, b])
def compute(self, a_var, b_var):
self.inputs = [a_var, b_var]
return a_var * b_var
In [9]:
class matmul(Operation):
def __init__(self, a, b):
super().__init__([a, b])
def compute(self, a_mat, b_mat):
self.inputs = [a_mat, b_mat]
return a_mat.dot(b_mat)
In [10]:
class Placeholder():
"""
A placeholder is a node that needs to be provided a value for computing the output in the Graph.
In case of supervised learning, X (input) and Y (output) will require placeholders.
"""
def __init__(self):
self.output_nodes = []
_default_graph.placeholders.append(self)
In [11]:
class Variable():
"""
This variable is a changeable parameter of the Graph.
For a simple neural networks, it will be weights and biases.
"""
def __init__(self, initial_value = None):
self.value = initial_value
self.output_nodes = []
_default_graph.variables.append(self)
In [12]:
class Graph():
def __init__(self):
self.operations = []
self.placeholders = []
self.variables = []
def set_as_default(self):
"""
Sets this Graph instance as the Global Default Graph
"""
global _default_graph
_default_graph = self
In [13]:
g = Graph()
In [14]:
g.set_as_default()
In [15]:
print("Operations:")
print(g.operations)
print("Placeholders:")
print(g.placeholders)
print("Variables:")
print(g.variables)
In [16]:
A = Variable(10)
In [17]:
print("Operations:")
print(g.operations)
print("Placeholders:")
print(g.placeholders)
print("Variables:")
print(g.variables)
In [18]:
b = Variable(1)
In [19]:
print("Operations:")
print(g.operations)
print("Placeholders:")
print(g.placeholders)
print("Variables:")
print(g.variables)
In [20]:
# Will be filled out later
x = Placeholder()
In [21]:
print("Operations:")
print(g.operations)
print("Placeholders:")
print(g.placeholders)
print("Variables:")
print(g.variables)
In [22]:
y = multiply(A,x)
In [23]:
print("Operations:")
print(g.operations)
print("Placeholders:")
print(g.placeholders)
print("Variables:")
print(g.variables)
In [24]:
z = add(y, b)
In [25]:
print("Operations:")
print(g.operations)
print("Placeholders:")
print(g.placeholders)
print("Variables:")
print(g.variables)
In [26]:
import numpy as np
More details about tree post order traversal: https://en.wikipedia.org/wiki/Tree_traversal#Post-order_(LRN)
In [27]:
def traverse_postorder(operation):
"""
PostOrder Traversal of Nodes.
Basically makes sure computations are done in the correct order (Ax first , then Ax + b).
"""
nodes_postorder = []
def recurse(node):
if isinstance(node, Operation):
for input_node in node.input_nodes:
recurse(input_node)
nodes_postorder.append(node)
recurse(operation)
return nodes_postorder
In [28]:
class Session:
def run(self, operation, feed_dict = {}):
"""
operation: The operation to compute
feed_dict: Dictionary mapping placeholders to input values (the data)
"""
# Puts nodes in correct order
nodes_postorder = traverse_postorder(operation)
print("Post Order:")
print(nodes_postorder)
for node in nodes_postorder:
if type(node) == Placeholder:
node.output = feed_dict[node]
elif type(node) == Variable:
node.output = node.value
else: # Operation
node.inputs = [input_node.output for input_node in node.input_nodes]
node.output = node.compute(*node.inputs)
# Convert lists to numpy arrays
if type(node.output) == list:
node.output = np.array(node.output)
# Return the requested node value
return operation.output
In [29]:
sess = Session()
In [30]:
result = sess.run(operation = z,
feed_dict = {x : 10})
The result should look like: Variable (A), Placeholder (x), Multiple operation (Ax), Variable (b), Add (Ax + b)
In [31]:
result
Out[31]:
In [32]:
10 * 10 + 1
Out[32]:
In [33]:
# Running just y = Ax
# The post order should be only up to
result = sess.run(operation = y,
feed_dict = {x : 10})
In [34]:
result
Out[34]:
Looks like we did it!
In [35]:
g = Graph()
g.set_as_default()
A = Variable([[10, 20], [30, 40]])
b = Variable([1, 1])
x = Placeholder()
y = matmul(A,x)
z = add(y,b)
In [36]:
sess = Session()
In [37]:
result = sess.run(operation = z,
feed_dict = {x : 10})
In [38]:
result
Out[38]:
In [39]:
import matplotlib.pyplot as plt
%matplotlib inline
In [40]:
# Defining sigmoid function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
In [41]:
sample_z = np.linspace(-10, 10, 100)
sample_a = sigmoid(sample_z)
In [42]:
plt.figure(figsize = (8, 8))
plt.title("Sigmoid")
plt.plot(sample_z, sample_a)
Out[42]:
In [43]:
class Sigmoid(Operation):
def __init__(self, z):
# a is the input node
super().__init__([z])
def compute(self, z_val):
return 1 / (1 + np.exp(-z_val))
In [44]:
from sklearn.datasets import make_blobs
In [45]:
# Creating 50 samples divided into 2 blobs with 2 features
data = make_blobs(n_samples = 50,
n_features = 2,
centers = 2,
random_state = 75)
In [46]:
data
Out[46]:
In [47]:
features = data[0]
plt.scatter(features[:,0],features[:,1])
Out[47]:
In [48]:
labels = data[1]
plt.scatter(x = features[:,0],
y = features[:,1],
c = labels,
cmap = 'coolwarm')
Out[48]:
In [49]:
# DRAW A LINE THAT SEPERATES CLASSES
x = np.linspace(0, 11 ,10)
y = -x + 5
plt.scatter(features[:,0],
features[:,1],
c = labels,
cmap = 'coolwarm')
plt.plot(x,y)
Out[49]:
Then if the result is > 0 its label 1, if it is less than 0, it is label=0
In [50]:
z = np.array([1, 1]).dot(np.array([[8], [10]])) - 5
print(z)
In [51]:
a = 1 / (1 + np.exp(-z))
print(a)
Or if we have (4,-10)
In [52]:
z = np.array([1,1]).dot(np.array([[2],[-10]])) - 5
print(z)
In [53]:
a = 1 / (1 + np.exp(-z))
print(a)
In [54]:
g = Graph()
In [55]:
g.set_as_default()
In [56]:
x = Placeholder()
In [57]:
w = Variable([1,1])
In [58]:
b = Variable(-5)
In [59]:
z = add(matmul(w,x),b)
In [60]:
a = Sigmoid(z)
In [61]:
sess = Session()
In [62]:
sess.run(operation = a,
feed_dict = {x : [8, 10]})
Out[62]:
In [63]:
sess.run(operation = a,
feed_dict = {x : [2, -10]})
Out[63]: